/* Just some sanity to ensure that the scheduler is set up okay. */
ASSERT(current->id == IDLE_DOMAIN_ID);
domain_unpause_by_systemcontroller(current);
- __enter_scheduler();
+ raise_softirq(SCHEDULE_SOFTIRQ);
+ do_softirq();
/*
* Declares CPU setup done to the boot processor.
MEM_LOG("ptwr: Could not read pte at %p\n", ptep);
/*
* Really a bug. We could read this PTE during the initial fault,
- * and pagetables can't have changed meantime. XXX Multi-CPU guests?
+ * and pagetables can't have changed meantime.
*/
BUG();
}
MEM_LOG("ptwr: Could not update pte at %p\n", ptep);
/*
* Really a bug. We could write this PTE during the initial fault,
- * and pagetables can't have changed meantime. XXX Multi-CPU guests?
+ * and pagetables can't have changed meantime.
*/
BUG();
}
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
}
domain_crash();
+ return;
}
if ( unlikely(sl1e != NULL) )
/* Get the L2 index at which this L1 p.t. is always mapped. */
l2_idx = page->u.inuse.type_info & PGT_va_mask;
if ( unlikely(l2_idx >= PGT_va_unknown) )
+ {
domain_crash(); /* Urk! This L1 is mapped in multiple L2 slots! */
+ return 0;
+ }
l2_idx >>= PGT_va_shift;
if ( l2_idx == (addr >> L2_PAGETABLE_SHIFT) )
{
MEM_LOG("PTWR failure! Pagetable maps itself at %08lx\n", addr);
domain_crash();
+ return 0;
}
/*
unmap_domain_mem(ptwr_info[cpu].ptinfo[which].pl1e);
ptwr_info[cpu].ptinfo[which].l1va = 0;
domain_crash();
+ return 0;
}
return EXCRET_fault_fixed;
#ifndef NDEBUG
-void ptwr_status(void)
-{
- unsigned long pte, *ptep, pfn;
- struct pfn_info *page;
- int cpu = smp_processor_id();
-
- ptep = (unsigned long *)&linear_pg_table
- [ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va>>PAGE_SHIFT];
-
- if ( __get_user(pte, ptep) ) {
- MEM_LOG("ptwr: Could not read pte at %p\n", ptep);
- domain_crash();
- }
-
- pfn = pte >> PAGE_SHIFT;
- page = &frame_table[pfn];
- printk("need to alloc l1 page %p\n", page);
- /* make pt page writable */
- printk("need to make read-only l1-page at %p is %08lx\n",
- ptep, pte);
-
- if ( ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va == 0 )
- return;
-
- if ( __get_user(pte, (unsigned long *)
- ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) ) {
- MEM_LOG("ptwr: Could not read pte at %p\n", (unsigned long *)
- ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va);
- domain_crash();
- }
- pfn = pte >> PAGE_SHIFT;
- page = &frame_table[pfn];
-}
-
void audit_domain(struct domain *d)
{
int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0;
&linear_pg_table[va >> PAGE_SHIFT])) )
{
SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
- shadow_unlock(m);
- return 0;
+ goto fail;
}
if ( unlikely(!(gpte & _PAGE_PRESENT)) )
{
SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
- shadow_unlock(m);
- return 0;
+ goto fail;
}
/* Write fault? */
{
/* Write fault on a read-only mapping. */
SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
- shadow_unlock(m);
- return 0;
+ goto fail;
}
l1pte_write_fault(m, &gpte, &spte);
/* XXX Watch out for read-only L2 entries! (not used in Linux). */
if ( unlikely(__put_user(gpte, (unsigned long *)
&linear_pg_table[va >> PAGE_SHIFT])) )
+ {
domain_crash();
+ goto fail;
+ }
/*
* Update of shadow PTE can fail because the L1 p.t. is not shadowed,
check_pagetable(m, current->mm.pagetable, "post-sf");
return EXCRET_fault_fixed;
+
+ fail:
+ shadow_unlock(m);
+ return 0;
}
struct domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
- DEBUGGER_trap_entry(TRAP_debug, regs);
-
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
/* Mask out spurious debug traps due to lazy DR7 setting */
goto out;
}
+ DEBUGGER_trap_entry(TRAP_debug, regs);
+
if ( !GUEST_FAULT(regs) )
{
/* Clear TF just for absolute sanity. */
jmp test_all_events
DBLFIX1:GET_CURRENT(%ebx)
testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
- jnz domain_crash # cannot reenter failsafe code
+ jnz domain_crash_synchronous # cannot reenter failsafe code
orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
jmp test_all_events # will return via failsafe code
.previous
.long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
.previous
.section __ex_table,"a"
- .long DBLFLT2,domain_crash
+ .long DBLFLT2,domain_crash_synchronous
.previous
ALIGN
jmp test_all_events
.section __ex_table,"a"
- .long VFLT1,domain_crash
- .long VFLT2,domain_crash
- .long VFLT3,domain_crash
+ .long VFLT1,domain_crash_synchronous
+ .long VFLT2,domain_crash_synchronous
+ .long VFLT3,domain_crash_synchronous
.previous
.data
#include <xen/lib.h>
#include <xen/errno.h>
#include <xen/sched.h>
+#include <xen/softirq.h>
#include <xen/mm.h>
#include <xen/event.h>
#include <xen/time.h>
send_guest_virq(dom0, VIRQ_DOM_EXC);
- __enter_scheduler();
- BUG();
+ raise_softirq(SCHEDULE_SOFTIRQ);
+}
+
+
+void domain_crash_synchronous(void)
+{
+ domain_crash();
+ for ( ; ; )
+ do_softirq();
}
void domain_shutdown(u8 reason)
}
}
- if ( reason == SHUTDOWN_crash )
- {
- domain_crash();
- BUG();
- }
-
- current->shutdown_code = reason;
- set_bit(DF_SHUTDOWN, ¤t->flags);
+ if ( (current->shutdown_code = reason) == SHUTDOWN_crash )
+ set_bit(DF_CRASHED, ¤t->flags);
+ else
+ set_bit(DF_SHUTDOWN, ¤t->flags);
send_guest_virq(dom0, VIRQ_DOM_EXC);
- __enter_scheduler();
+ raise_softirq(SCHEDULE_SOFTIRQ);
}
unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
NULL
};
-/* Operations for the current scheduler. */
+static void __enter_scheduler(void);
+
static struct scheduler ops;
#define SCHED_OP(fn, ...) \
* - deschedule the current domain (scheduler independent).
* - pick a new domain (scheduler dependent).
*/
-void __enter_scheduler(void)
+static void __enter_scheduler(void)
{
struct domain *prev = current, *next = NULL;
int cpu = prev->processor;
struct domain *find_last_domain(void);
extern void domain_destruct(struct domain *d);
extern void domain_kill(struct domain *d);
-extern void domain_crash(void);
extern void domain_shutdown(u8 reason);
+/*
+ * Mark current domain as crashed. This function returns: the domain is not
+ * synchronously descheduled from any processor.
+ */
+extern void domain_crash(void);
+
+/*
+ * Mark current domain as crashed and synchronously deschedule from the local
+ * processor. This function never returns.
+ */
+extern void domain_crash_synchronous(void) __attribute__((noreturn));
+
void new_thread(struct domain *d,
unsigned long start_pc,
unsigned long start_stack,
void domain_wake(struct domain *d);
void domain_sleep(struct domain *d);
-void __enter_scheduler(void);
-
extern void switch_to(struct domain *prev,
struct domain *next);